CarND-Advanced-Lane-Lines

Project Steps:

  1. Camera calibration
  2. Distortion correction
  3. Color/gradient threshold
  4. Perspective transform
  5. Detect lane lines
  6. Determine the lane curvature
  7. Drawing
  8. Pipeline (Video)

1. Camera calibration

In [44]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import toolbox
import pickle
%matplotlib inline
In [45]:
#Finding Corners in a calibration image
fname = './camera_cal/calibration1.jpg'
img = cv2.imread(fname)

# prepare object points
nx = 9#TODO: enter the number of inside corners in x
ny = 6#TODO: enter the number of inside corners in y

# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

# If found, draw corners
if ret == True:
    # Draw and display the corners
    cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
    plt.imshow(img)
    plt.title("Calibration image with corners")
In [46]:
#prepare object points
nx = 9#TODO: enter the number of inside corners in x
ny = 6#TODO: enter the number of inside corners in y

#read all images in folder
images = glob.glob('./camera_cal/calibration*.jpg')

objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane

objp = np.zeros([ny*nx,3], np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2) # x, y coordinates

# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# If cornes are found, add object points, add image points and draw corners
for name in images:
    img = cv2.imread(name)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
    if ret == True:
        imgpoints.append(corners)
        objpoints.append(objp)
In [47]:
#Undistorting a test image:
fname = './camera_cal/calibration1.jpg'
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#Camera calibration, given object points, image points, and the shape of the grayscale image:
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
#ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[0:2], None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
toolbox.plot2img(img,undist)
In [48]:
#Save Calibrate Parameters
coefficients = {'mtx': mtx, 'dist': dist}
with open('calibrate_camera.p', 'wb') as f:
    pickle.dump(coefficients, f)

2. Distortion correction

In [6]:
fname = './camera_cal/calibration4.jpg'
img = mpimg.imread(fname)
undist = cv2.undistort(img, mtx, dist, None, mtx)

f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(undist)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

An example of a distortion-corrected image.

In [7]:
fname = './test_images/test1.jpg'
img = mpimg.imread(fname)

##Note: If you are reading in an image using mpimg.imread() 
##this will read in an RGB image and you should convert to grayscale using cv2.COLOR_RGB2GRAY
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

##Another way to retrieve the image shape, is to get them directly 
##from the color image by retrieving the first two values in the color image shape array using img.shape[0:2].
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
undistorted = cv2.undistort(img, mtx, dist, None, mtx)

f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(undistorted)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

3. Color & Gradient Threshold

In [8]:
# Define a function that applies Sobel x or y, 
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
import thresholding
In [56]:
# Edit this function to create your own pipeline.
def pipeline(img, mtx, dist):
    
    img = np.copy(img)
    
    ########################################################################################
    ##STEP: DISTORSION CORRECTION
    ########################################################################################
    undist = cv2.undistort(img, mtx, dist, None, mtx)

    ########################################################################################
    #STEP:  LANE FINDING
    ########################################################################################
       
    combined_binary, sxbinary, s_binary = thresholding.process_image_grad_color(img)
    
    ########################################################################################
    ########################################################################################

    
    ########################################################################################
    ##STEP: WARPER IMAGE
    #top_down, poly, _, _ = warper(combined_binary, mtx, dist, draw_poly=False)
    ########################################################################################
    
    #toolbox.plot3img(s_binary,sxbinary,color_binary)
    return undist, combined_binary
    

3.1 Color Thresholds:

Testing Color Thresholds in RGB Space

In [10]:
fname = './test_images/test1.jpg'

##Note: If you read in an image using matplotlib.image.imread() you will get an RGB image, 
##but if you read it in using OpenCV cv2.imread() this will give you a BGR image.

#Read BGR image
#img = cv2.imread(fname)

#Read RGB image
img = mpimg.imread(fname)
img = np.copy(img)

R = img[:, :, 0]
G = img[:, :, 1]

RG = np.dstack(( R, G, np.zeros_like(R)))

R  = np.dstack(( R, np.zeros_like(R), np.zeros_like(R)))

combined_img = thresholding.combine_grad(img)
toolbox.plot2img(img, combined_img,['Original', 'Gradient Thresholds in Channels RGB'])

combined_img = thresholding.combine_grad(R)
toolbox.plot2img(img, combined_img, ['Original', 'Gradient Thresholds in Channel R'])

combined_img = thresholding.combine_grad(RG)
toolbox.plot2img(img, combined_img, ['Original', 'Gradient Thresholds in Channels RG'])

3.2 Testing Combined Thresholds

  • Gradient thresholds using RGB Space
  • Color thresholds using HLS Space

Thresholding steps & functions in file thresholding.py.

In [11]:
fname = './test_images/test1.jpg'
#Read RGB image
img = mpimg.imread(fname)
undist, combined_binary = pipeline(img)

toolbox.plot2img(undist, combined_binary,['Undistorted Original', 'Thresholded Binary Image'])

# Plot the result

Now, I test combined thresholds over all images in test folder

In [12]:
#read all images in folder
images = glob.glob('./test_images/test*.jpg')
for fname in images:
    #Read RGB image
    img = mpimg.imread(fname)
    undist, combined_binary = pipeline(img)
    toolbox.plot2img(undist, combined_binary, ['Undistorted Original', 'Thresholded Binary Image'])

4. Perspective Transform

Notes: Pick four points in a trapezoidal shape (similar to region masking) that would represent a rectangle when looking down on the road from above.

The easiest way to do this is to investigate an image where the lane lines are straight, and find four points lying along the lines that, after perspective transform, make the lines look straight and vertical from a bird's eye view perspective.

4.1 Warper Function

In [17]:
#The code for my perspective transform includes a function called `warper()`, 
#which appears in the file `lines.py` 
# lines.warper(img)
import lines
In [50]:
fname = './test_images/test1.jpg'
#Read RGB image
img = mpimg.imread(fname)
undist = cv2.undistort(img, mtx, dist, None, mtx)
top_down, poly, _,_ = lines.warper(undist, mtx, dist, draw_poly=True)
toolbox.plot2img(poly, top_down, ['Original Image','Undistorted and Warped Image'])

5. Detect lane lines

Locate the Lane Lines and Fit a Polynomial

Notes: You now have a thresholded warped image and you're ready to map out the lane lines! There are many ways you could go about this, but here's one example of how you might do it:

Line Finding Method: Peaks in a Histogram

After applying calibration, thresholding, and a perspective transform to a road image, you should have a binary image where the lane lines stand out clearly. However, you still need to decide explicitly which pixels are part of the lines and which belong to the left line and which belong to the right line.

I first take a histogram along all the columns in the lower half of the image like this:

In [65]:
#read all images in folder
images = glob.glob('./test_images/test*.jpg')
for fname in images:
    img = mpimg.imread(fname)

    undist, combined_binary = pipeline(img,mtx, dist)
    top_down, poly, _, _ = lines.warper(combined_binary, mtx, dist, draw_poly=False)
    binary_warped = top_down[top_down.shape[0]//2:,:]

    toolbox.plot3img(combined_binary, top_down, binary_warped, ['Undistorted & Thresholded Image','Warped Image','Cropped Image'])

5.1 Histogram

In [25]:
histogram = np.sum(binary_warped, axis=0)
plt.plot(histogram)
plt.title('Histogram of Warped Image')
plt.show()

5.2 Implement Sliding Windows and Fit a Polynomial

In [26]:
#I use ‘fit_poly()’ function,
#which appears in the file lines.py
#lines.find_poly()
import lines
In [57]:
def fit_poly(binary_warped):
    # Assuming you have created a warped binary image called "binary_warped"
    # Take a histogram of the bottom half of the image
    histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
    # Create an output image to draw on and  visualize the result
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]/2)
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint

    # Choose the number of sliding windows
    nwindows = 10
    # Set height of windows
    window_height = np.int(binary_warped.shape[0]/nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Current positions to be updated for each window
    leftx_current = leftx_base
    rightx_current = rightx_base
    # Set the width of the windows +/- margin
    margin = 100
    # Set minimum number of pixels found to recenter window
    minpix = 50
    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []

    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        # Draw the windows on the visualization image
        cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) 
        cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2) 
        # Identify the nonzero pixels in x and y within the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds] 

    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    
    return out_img, nonzerox, nonzeroy, leftx, lefty, left_lane_inds, right_lane_inds, left_fit, right_fit

5.2.1 Visualization of Sliding Windows and a second order polynomial

5.3. Skip the sliding windows step once you know where the lines are

In [29]:
#I use ‘fit_line_with_line_info()’ function,
#which appears in the file lines.py
#lines.fit_line_with_line_info()
import lines

6. Determine the Lane Curvature

6.1 Function to calculate the radius of curvature of the lane and the position of the vehicle with respect to center

In [31]:
#Then I use ‘find_curvature()’ function,
#which appears in the file lines.py
#lines.find_curvature()
import lines
In [32]:
def find_curvature(left_lane_inds, right_lane_inds, nonzerox, nonzeroy, test=False):
        """
        Calculate radius of curvature in meters
        """
        y_eval = 719  # 720p video/image, so last (lowest on screen) y index is 719

        # Define conversions in x and y from pixels space to meters
        ym_per_pix = 30/720 # meters per pixel in y dimension
        xm_per_pix = 3.7/700 # meters per pixel in x dimension

        # Extract left and right line pixel positions
        leftx = nonzerox[left_lane_inds]
        lefty = nonzeroy[left_lane_inds]
        rightx = nonzerox[right_lane_inds]
        righty = nonzeroy[right_lane_inds]

        # Fit new polynomials to x,y in world space
        left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
        right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
        # Calculate the new radii of curvature
        left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
        right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
        # Now our radius of curvature is in meters
        if test:
            print('Radius of curvature', left_curverad, 'meters','right_curverad', right_curverad, 'meters')
        return left_curverad, right_curverad

6.2 Function to calculate the position of the vehicle with respect to center

In [33]:
def find_vehicle_offset(undist, left_fit, right_fit, test=False):
        """
        Calculate vehicle offset from lane center, in meters
        """
        # Calculate vehicle center offset in pixels
        bottom_y = undist.shape[0] - 1
        bottom_x_left = left_fit[0]*(bottom_y**2) + left_fit[1]*bottom_y + left_fit[2]
        bottom_x_right = right_fit[0]*(bottom_y**2) + right_fit[1]*bottom_y + right_fit[2]
        vehicle_offset = undist.shape[1]/2 - (bottom_x_left + bottom_x_right)/2

        # Convert pixel offset to meters
        xm_per_pix = 3.7/700 # meters per pixel in x dimension
        vehicle_offset *= xm_per_pix
        if test:    
            print('Vehicle offset: ', vehicle_offset, 'meters')
        return vehicle_offset
In [59]:
#read all images in folder
images = glob.glob('./test_images/test*.jpg')
for fname in images:
    #Read RGB image
    img = mpimg.imread(fname)
    undist, combined_binary = pipeline(img, mtx, dist)
    binary_warped, poly, _, _ = lines.warper(combined_binary, mtx, dist, draw_poly=False)
    ############################################################
    ##Fit polyline to the Lane Curvature
    ############################################################
    out_img, nonzerox, nonzeroy,leftx, lefty, left_lane_inds,right_lane_inds, left_fit, right_fit = fit_poly(binary_warped)
    ############################################################
    ##Determine the Lane Curvature
    ############################################################
    find_curvature(left_lane_inds, right_lane_inds, nonzerox, nonzeroy, test=True)
    
    ############################################################
    ##Determine  position of the vehicle with respect to center
    ############################################################
    find_vehicle_offset(undist, left_fit, right_fit, test=True)
Radius of curvature 1186.16471683 meters right_curverad 1759.36698822 meters
Vehicle offset:  -0.089949348335 meters
Radius of curvature 795.731225756 meters right_curverad 543.043058251 meters
Vehicle offset:  -0.170335321614 meters
Radius of curvature 1499.24010017 meters right_curverad 1191.02971628 meters
Vehicle offset:  -0.088022506257 meters
Radius of curvature 5173.36192065 meters right_curverad 449.054884781 meters
Vehicle offset:  -0.296129987931 meters
Radius of curvature 517.658527574 meters right_curverad 18213.2496959 meters
Vehicle offset:  -0.0246654316551 meters
Radius of curvature 1110.25019003 meters right_curverad 653.520009487 meters
Vehicle offset:  -0.251002434546 meters

7. Drawing

In [60]:
def draw_poly(undist, warped, right_fit , left_fit, left_curve, right_curve, vehicle_offset):
    
    # Generate x and y values for plotting
    ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

    # Create an image to draw the lines on
    warp_zero = np.zeros_like(warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) 
    # Combine the result with the original image
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
    
    ##
    # Annotate lane curvature values and vehicle offset from center
    avg_curve = (left_curve + right_curve)/2
    label_str = 'Radius of curvature: %.1f m' % avg_curve
    result = cv2.putText(result, label_str, (30,40), 0, 1, (0,0,0), 2, cv2.LINE_AA)

    label_str = 'Vehicle offset from lane center: %.1f m' % vehicle_offset
    result = cv2.putText(result, label_str, (30,70), 0, 1, (0,0,0), 2, cv2.LINE_AA)

    return result
In [68]:
image_names = glob.glob('./test_images/test*.jpg')
for f in image_names:
    ##Test Draw images
    #left_curve, right_curve, vehicle_offset = 444.01 , 488.01 , 0.1 ,
    #fname = './test_images/test4.jpg'
    #Read RGB image
    img = mpimg.imread(f)

    undist, combined_binary = pipeline(img,mtx, dist)
    top_down, poly, _, _ = lines.warper(combined_binary, mtx, dist, draw_poly=False)
    out_img, nonzerox, nonzeroy,leftx, lefty, left_lane_inds, right_lane_inds, left_fit, right_fit = fit_poly(top_down)
    left_curve, right_curve = find_curvature(left_lane_inds, right_lane_inds, nonzerox, nonzeroy, test=True)   
    vehicle_offset = find_vehicle_offset(undist, left_fit, right_fit, test=True)
    result = draw_poly(undist, top_down, right_fit , left_fit, left_curve, right_curve, vehicle_offset)

    toolbox.plot3img(combined_binary, out_img, result, ['Thresholded Binary Image', 'Lane Lines','Combined Original'])
    #toolbox.plot2img(combined_binary,result,['Thresholded Binary Image','Combined Original'])
Radius of curvature 1186.16471683 meters right_curverad 1759.36698822 meters
Vehicle offset:  -0.089949348335 meters
Radius of curvature 795.731225756 meters right_curverad 543.043058251 meters
Vehicle offset:  -0.170335321614 meters
Radius of curvature 1499.24010017 meters right_curverad 1191.02971628 meters
Vehicle offset:  -0.088022506257 meters
Radius of curvature 5173.36192065 meters right_curverad 449.054884781 meters
Vehicle offset:  -0.296129987931 meters
Radius of curvature 517.658527574 meters right_curverad 18213.2496959 meters
Vehicle offset:  -0.0246654316551 meters
Radius of curvature 1110.25019003 meters right_curverad 653.520009487 meters
Vehicle offset:  -0.251002434546 meters
In [69]:
# Read camera calibration coefficients
with open('calibrate_camera.p', 'rb') as f:
    coefficients = pickle.load(f)
mtx = coefficients['mtx']
dist = coefficients['dist']
In [76]:
def process_image(img, test=False):
    undist, combined_binary = pipeline(img,mtx, dist)
    top_down, poly, M, Minv = lines.warper(combined_binary, mtx, dist, draw_poly=False)
    ############################################################
    ##Fit polyline to the Lane Curvature
    ############################################################
    out_img, nonzerox, nonzeroy,leftx, lefty, left_lane_inds, right_lane_inds, left_fit, right_fit = fit_poly(top_down)
    ############################################################
    ##Determine the Lane Curvature
    ############################################################
    left_curve, right_curve = find_curvature(left_lane_inds, right_lane_inds, nonzerox, nonzeroy, test=test)   
    ############################################################
    ##Determine  position of the vehicle with respect to center
    ############################################################
    vehicle_offset = find_vehicle_offset(undist, left_fit, right_fit, test=test) 
    ############################################################
    ##Drawing info
    ############################################################
    result = draw_poly(undist, top_down, right_fit , left_fit, left_curve, right_curve, vehicle_offset)

    if (test):
        toolbox.plot3img(combined_binary, out_img, result, ['Thresholded Binary Image', 'Lane Lines','Combined Original'])
    return result
In [77]:
#read all images in folder
image_names = glob.glob('./test_images/test*.jpg')
for f in image_names:
    #Read RGB image
    img = mpimg.imread(f)
    process_image(img,test=True)
Radius of curvature 1186.16471683 meters right_curverad 1759.36698822 meters
Vehicle offset:  -0.089949348335 meters
Radius of curvature 795.731225756 meters right_curverad 543.043058251 meters
Vehicle offset:  -0.170335321614 meters
Radius of curvature 1499.24010017 meters right_curverad 1191.02971628 meters
Vehicle offset:  -0.088022506257 meters
Radius of curvature 5173.36192065 meters right_curverad 449.054884781 meters
Vehicle offset:  -0.296129987931 meters
Radius of curvature 517.658527574 meters right_curverad 18213.2496959 meters
Vehicle offset:  -0.0246654316551 meters
Radius of curvature 1110.25019003 meters right_curverad 653.520009487 meters
Vehicle offset:  -0.251002434546 meters

8. Pipeline (Video)

In [79]:
### Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# Set up lines for left and right
#left_lane = lines.Line()
#right_lane = lines.Line()
file_output = 'challenge_video_output_v06.mp4'
clip1 = VideoFileClip("challenge_video.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(file_output, audio=False)
[MoviePy] >>>> Building video challenge_video_output_v06.mp4
[MoviePy] Writing video challenge_video_output_v06.mp4
100%|██████████| 485/485 [03:04<00:00,  2.84it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: challenge_video_output_v06.mp4 

CPU times: user 2min 32s, sys: 50.2 s, total: 3min 23s
Wall time: 3min 6s
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 

5.4 Sliding Window Search with Convolutions

In [ ]: